We have three tasks during the fMRI session which we need to analyze for our study as mentioned in the Introduction section. These tasks include a 2-Back, Oddball, and Associative Retrieval. Each requires a slightly different approach so we do some data cleaning in chunks of code before we start.
Data Cleaning
We first import the data from individual logs. To this end, we have a small function that reads a file and assigns the subject ID. We compile the trial-by-trial logs for all subjects into a single data frame which we then subset for each task separately. We then combine this data frame with the last data frame we used in the previous section for the balance.
# First get a list of all the log files
logs_beh <-(Sys.glob("/project/3013068.02/data/*/logs/mri/*mri*"))
# Make a function to read the log files + add session + subject info
read_add <- function(file, seperator, headers){
df <- read.csv2(file=file, sep=seperator, header=headers)
id <- file; id <- sub(".*sub_", "", id); id <- sub("/logs/.*", "", id)
df$id <- id
# Select session indicatior
ses <- file; ses <- sub(".*logs/mri/Sub", "", ses); ses <- sub(".txt.*", "", ses)
df$ses <- ses
df }
# Run function
df_task <- rbindlist(lapply(logs_beh, read_add, seperator="\t", headers=FALSE))
# Coloumn headers
names(df_task) <- c("task", "stimulus", "valence", "trial_onset", "trial_offset", "rt", "button_pressed", "button_correct",
"block_onset","block_offset","experiment_start", "experiment_end", "sub_id", "session_id")
# Drop the headers added when importing, and retype
df_task <- df_task %>% subset(task != "Trial_Type" & task != "Trial_Type " ) %>% separate(session_id, c("sub", "mri", "session","run")) %>% retype()
# Make a factor for task types and sessions and runs
df_task$sub_id <- as.factor(df_task$sub_id)
df_task$sub_nr <- paste("sub", (str_pad(df_task$sub_id, 3, pad = "0")), sep="_")
# Label the tasks
df_task$Task <- factor(df_task$task, level=c(1,2,3), labels=c("oddball", "nback", "memory"))
# Label the session and runs
df_task$Session <- factor(df_task$session, level=c("s1","s2"), labels=c("Control", "Stress"))
df_task$Run <- factor(df_task$run, level=c("r1","r2","r3","r4","r5","r6"), labels=c("r1","r2","r3","r4","r5","r6"))
df_task$run <- as.numeric(df_task$Run)
df_task$Phase <- "Late"
df_task$Phase[df_task$Run =="r1" | df_task$Run =="r2" | df_task$Run =="r3"] <- "Early"
df_task$run <- as.numeric(df_task$Run)
# Subset the fmri dataframe
df_fmri.balance_4tasks <- df_fmri.stress %>% pivot_wider(id_cols=c(id, age, sex, contraceptive_use, first_scan, Session, Run), names_from=c(Network), values_from=c(Signal,resBIN,AUCi_difference) ) %>% dplyr::select(1:11,15)
names(df_fmri.balance_4tasks ) <- c("sub_nr","age","sex", "contraceptive_use", "Scan_Order", "Session", "Run", "ECN", "SN", "DMN", "resBIN", "AUCi_diff")
# Make the scores
df_fmri.balance_4tasks <- df_fmri.balance_4tasks %>% dplyr::mutate(SN_ECN=SN-ECN, ECN_DMN=ECN-DMN, SN_DMN=SN-DMN) %>% dplyr::filter(!is.na(sex))
df_task <- merge(df_task, df_fmri.balance_4tasks, by.x=c("sub_nr", "Session", "Phase"), by.y=c("sub_nr", "Session", "Run"))
Nback
We start of with the nback task. We first correct the nback trials for the baseline reaction speed using the oddball reaction time from each run as the correcting factor. We then also calculate a measure that combines speed and accuracy after that. Here we use the LIASES method, which is a bit complicated. We do this in a separate chunk.
df_task.nback <- df_task %>% group_by(sub_id, Session, Run) %>% dplyr::mutate(odd_mean_rt=mean(ifelse(Task=="oddball", rt, NA), na.rm=TRUE)) %>% ungroup() %>% subset(Task=="nback") %>% subset(rt>200)
df_task.nback$rt_corr <- df_task.nback$rt/df_task.nback$odd_mean_rt
df_task.nback$Hit[df_task.nback$button_pressed>0 & df_task.nback$button_correct>0] <- 1
df_task.nback$Miss[df_task.nback$button_pressed==0 & df_task.nback$button_correct>0] <- 1
df_task.nback$FA[df_task.nback$button_pressed>0 & df_task.nback$button_correct==0] <- 1
df_task.nback$CorrRej[df_task.nback$button_pressed==0 & df_task.nback$button_correct==0] <- 1
We now calculate the LIASES score based as detailed in the methods paper by André Vandierendonck (2017). We need to get the RT by participant/condition, the SD of both the RTs and the PEs, and then the total PE per condition. SO we can then compare this measure between runs and conditions. The formula is RTj+ (SD(RT)/SD(PE)) * PEj where j=condition. So I first group by subject, get the RT sd, then group by session and run to get the PE per condition, and the RT per condition, and finally I get the overall PE from all conditions.
df_task.nback <- df_task.nback %>% group_by(sub_id) %>% dplyr::mutate(rt_sd=sd(ifelse(rt!=0, rt, NA), na.rm=TRUE)) %>% group_by(sub_id, Session, Run) %>% dplyr::mutate(pe=((sum(FA, na.rm=TRUE)+ sum(Miss, na.rm=TRUE))/length(rt)), rt_mean=mean(ifelse(rt!=0, rt, NA), na.rm=TRUE)) %>% ungroup %>% group_by(sub_id) %>% dplyr::mutate(pe_sd=sd(pe, na.rm=TRUE)) %>% subset(rt>200) %>% ungroup %>% dplyr::mutate(LIASES=rt_mean+(rt_sd/pe_sd)*pe) %>% ungroup()
Memory
For the memory tasks, we have trials with negative, and others with neutral images. So the goal is to look at RT’s, as well as the percent of correct responses. We subset the memory trials from the main data frame here, and then calculate all the measures we need for analysis.
# Filter out the memory trials
df_task.mem <- df_task %>% subset(Task=="memory")
# Label the factors
df_task.mem$valence <- factor(df_task.mem$valence, levels=c(1,2), labels=c("neutral","negative"))
# Get the number of correct answers and valences for sum calculates
df_task.mem$corr_resp[df_task.mem$button_correct==df_task.mem$button_pressed] <- 1
df_task.mem$incorr_resp[df_task.mem$button_correct!=df_task.mem$button_pressed] <- 1
df_task.mem$mem_neu_n[df_task.mem$valence=="neutral"] <- 1
df_task.mem$mem_neg_n[df_task.mem$valence=="negative"] <- 1
# Get averages and the likes
df_task.mem <- df_task.mem %>% group_by(sub_id, Session, Run) %>% dplyr::mutate(
mem_neu=sum(ifelse(valence=="neutral", mem_neu_n, NA), na.rm=TRUE),
mem_neg=sum(ifelse(valence=="negative", mem_neg_n, NA), na.rm=TRUE),
mem_tot=mem_neg+mem_neu,
mem_neu_corr=sum(ifelse(valence=="neutral", corr_resp, NA), na.rm=TRUE),
mem_neg_corr=sum(ifelse(valence!="neutral", corr_resp, NA), na.rm=TRUE),
mem_tot_corr=mem_neg_corr+mem_neu_corr,
mem_neg_per=mem_neg_corr/mem_neg,
mem_neu_per=mem_neu_corr/mem_neu,
mem_tot_per=(mem_neu_corr+mem_neg_corr)/mem_tot) %>% ungroup
Oddball
We also need to import the data logs for the oddball. The main outcome measure for this task is the oddball recall done outside the scanner, which we analyze a little differently. For this, we need to calculate dprime. I will also be looking at the RTs here for the individual trials during scanning here. So in addition to the previous data frame from the scan, we also import the data frame from outside the scanner. This was already done in python, so we just need to bring in the new data frame with some minor cleaning. We also calculate dprime for the overall performance, and also the performance by valence
# Oddball df
df_task.odd <- read.csv("/project/3013068.02/stats/fMRI/Behavioral/fmri_recall.csv", sep="\t", header=TRUE)
#Add variable for separating late, early
df_task.odd$Scan[df_task.odd$session==1 & df_task.odd$run==1] <- 1
df_task.odd$Scan[df_task.odd$session==1 & df_task.odd$run==2] <-2
df_task.odd$Scan[df_task.odd$session==2 & df_task.odd$run==1] <-3
df_task.odd$Scan[df_task.odd$session==2 & df_task.odd$run==2] <-4
#Make phases into factor
df_task.odd$Session <- factor(df_task.odd$session, levels=c(1,2),
labels=c('Control', 'Stress'))
df_task.odd$Scan <- factor(df_task.odd$Scan, levels=c(1,2, 3,4),
labels=c('Control-Early','Control-Late',
'Stress-Early', 'Stress-Late'))
df_task.odd$Run <- factor(df_task.odd$run, levels=c(1,2),
labels=c("Early","Late"))
# Add the MRI signal data
df_task.odd <- merge(df_task.odd, df_fmri.balance_4tasks, by.x=c("sub_nr", "Session", "Run"), by.y=c("sub_nr", "Session", "Run"))
# Now we calculate dprime
##First I drop the NAs
df_task.odd <- na.omit(df_task.odd)
df_task.odd_temp <- df_task.odd
# Then make the frame
df_task.odd_dprime_neg <-as.data.frame(dprime(df_task.odd_temp$odd_hits_neg, df_task.odd_temp$odd_FA_neg, n_miss=df_task.odd_temp$odd_miss_neg, n_cr=df_task.odd_temp$odd_correj_neg))
colnames(df_task.odd_dprime_neg) <- paste(colnames(df_task.odd_dprime_neg), "neg", sep = "_")
df_task.odd <- cbind(df_task.odd, df_task.odd_dprime_neg)
df_task.odd_dprime_pos <- as.data.frame(dprime(df_task.odd_temp$odd_hits_pos, df_task.odd_temp$odd_FA_pos, n_miss=df_task.odd_temp$odd_miss_pos, n_cr=df_task.odd_temp$odd_correj_pos))
colnames(df_task.odd_dprime_pos) <- paste(colnames(df_task.odd_dprime_pos), "pos", sep = "_")
df_task.odd <- cbind(df_task.odd, df_task.odd_dprime_pos)
df_task.odd_dprime <- dprime(df_task.odd_temp$odd_hits, df_task.odd_temp$odd_FA, n_miss=df_task.odd_temp$odd_miss, n_cr=df_task.odd_temp$odd_correj)
df_task.odd <- cbind(df_task.odd, df_task.odd_dprime)
Stats
# Function to run models in parallel
fx.par_model <- function(signal, dv, iv, re, sub_id, fam, data){
# Right hand formula; # Left hand formula
rs_formula=dv;
ls_formula= paste(iv, signal, sep="")
re_formula=paste("(", re, "|", sub_id,")", sep="")
# Put together ands run the model
full_form= paste(rs_formula, paste(ls_formula, re_formula, sep="+"), sep="~")
if (fam=="lmer"){
run_model=lmer(as.formula(full_form),
contrasts=list(Session="contr.treatment"),
data=data,
control=lmerControl(calc.derivs=FALSE, optCtrl=list(maxfun=100000), optimizer="bobyqa"))
}else{
run_model=glmer(as.formula(full_form),
contrasts=list(Session="contr.treatment"),
data=data,
family=fam,
control=glmerControl(calc.derivs=FALSE, optCtrl=list(maxfun=100000), optimizer="bobyqa")) }
return(run_model)}
Nback
For the nback, we will look at three measures: The reaction time (RT), the number of errors (PE), and the combined score using the LIASES method to combine accuracy and speed. We’ll run the RT model on individual trials where there was a response (so only response trials, without factoring in errors). We then look at the error rate using the proportion of misses and false alarms in the overall number of trials, and finally the LIASES that combines these effects, while also account for within subject deviations. These have all been calculated above, so we can just build the models here, and check the results.
Reaction Time
We first filter out the non-responses, and the responses that are too early.I also z-transform, and rescale the responses to make it easier to apply different types of fits. We then look at the distributions as that will help figure out the optimal models to fit. It appears that there is overall no real effect of reaction time, which I find strange looking at the graphs. Its OK though I guess. We don’t run any post-hoc tests due to no main effects in our model either.
Hist.
# First filter out the 0 RTs
df_task.nback_rts <- subset(df_task.nback, rt>200)
df_task.nback_rts$rt_z <- scale(df_task.nback_rts$rt, center=TRUE, scale=TRUE)
df_task.nback_rts$rt_s <- scales::rescale(df_task.nback_rts$rt_z, to=c(0.01,7))
hist((df_task.nback_rts$rt))

Models
# Contrast to investigate
signals=list("", "ECN")
# Run Models in parallel
models.nback_rt <- mcmapply(signals, FUN=function(X){fx.par_model(signal=X,
dv="rt_s",
iv=ifelse(X=="", "Session*Phase", "1+Session*Phase*"),
re=ifelse(X=="", "1+Session*Phase", paste("1+Session*Phase+", X, sep="")),
sub_id="sub_id",
fam=Gamma(link="inverse"),
data=df_task.nback_rts)}, mc.cores=5)
the condition has length > 1 and only the first element will be usedthe condition has length > 1 and only the first element will be used
#HTML Output
tab.2back_rt <- tab_model(models.nback_rt, dv.labels=c("Signal", "Signal ~ ECN"), show.stat=T, show.se=T)
knitr::asis_output(tab.2back_rt$knitr)
|
|
Signal
|
Signal ~ ECN
|
|
Predictors
|
Estimates
|
std. Error
|
CI
|
Statistic
|
p
|
Estimates
|
std. Error
|
CI
|
Statistic
|
p
|
|
(Intercept)
|
1.89
|
0.03
|
1.83 – 1.95
|
39.33
|
<0.001
|
1.90
|
0.03
|
1.83 – 1.96
|
37.76
|
<0.001
|
|
Session [Stress]
|
1.00
|
0.01
|
0.98 – 1.02
|
0.26
|
0.795
|
1.00
|
0.01
|
0.98 – 1.02
|
-0.07
|
0.946
|
|
Phase1
|
1.01
|
0.01
|
1.00 – 1.02
|
2.10
|
0.036
|
1.01
|
0.01
|
0.99 – 1.02
|
0.99
|
0.324
|
|
Session [Stress] * Phase1
|
1.00
|
0.01
|
0.98 – 1.01
|
-0.29
|
0.770
|
0.99
|
0.01
|
0.98 – 1.01
|
-0.58
|
0.561
|
|
ECN
|
|
|
|
|
|
0.99
|
0.01
|
0.97 – 1.00
|
-1.38
|
0.166
|
|
Session [Stress] * ECN
|
|
|
|
|
|
1.00
|
0.01
|
0.98 – 1.02
|
0.04
|
0.966
|
|
Phase1 * ECN
|
|
|
|
|
|
1.01
|
0.01
|
0.99 – 1.02
|
1.25
|
0.213
|
(Session [Stress] Phase1) ECN
|
|
|
|
|
|
1.00
|
0.01
|
0.98 – 1.02
|
0.39
|
0.694
|
|
Random Effects
|
|
σ2
|
0.29
|
0.29
|
|
τ00
|
0.02 sub_id
|
0.02 sub_id
|
|
τ11
|
0.00 sub_id.Session1
|
0.00 sub_id.Session1
|
|
|
0.00 sub_id.Phase1
|
0.00 sub_id.Phase1
|
|
|
0.00 sub_id.Session1:Phase1
|
0.00 sub_id.ECN
|
|
|
|
0.00 sub_id.Session1:Phase1
|
|
ρ01
|
0.07
|
0.10
|
|
|
-0.04
|
-0.05
|
|
|
0.04
|
0.09
|
|
|
|
0.05
|
|
ICC
|
0.05
|
|
|
N
|
70 sub_id
|
70 sub_id
|
|
Observations
|
18456
|
18456
|
|
Marginal R2 / Conditional R2
|
0.000 / 0.053
|
0.001 / NA
|
Post-Hoc
joint_tests(models.nback_rt[[2]], by=c("Session", "Phase"))
Session = Control, Phase = Early:
model term df1 df2 F.ratio p.value
ECN 1 Inf 0.046 0.8298
Session = Stress, Phase = Early:
model term df1 df2 F.ratio p.value
ECN 1 Inf 0.042 0.8380
Session = Control, Phase = Late:
model term df1 df2 F.ratio p.value
ECN 1 Inf 3.686 0.0549
Session = Stress, Phase = Late:
model term df1 df2 F.ratio p.value
ECN 1 Inf 3.444 0.0635
Plot: Main
df_task.nback_summ <- summarySEwithin(data=df_task.nback_rts, measurevar = "rt", idvar="sub_id", withinvars=c("Session", "Phase"), na.rm=FALSE)
df_task.nback_summ$Phase <- as.numeric(df_task.nback_summ$Phase)
df_task.nback_summ$Session <- relevel(df_task.nback_summ$Session, "Stress")
ggplot(df_task.nback_summ, aes(x=Phase, y=rt, color=Session, fill=Session)) +
geom_line() +
geom_errorbar(aes(ymin=rt-se, ymax=rt+se, color=Session),size=1, width=0.20, na.rm=T)+
ggtheme2

Plot: ECN Phase
ggplot(df_task.nback_rts, aes(y=rt_s, x=ECN, colour=Phase)) + geom_smooth(method="lm") + scale_color_manual(values=stress_colors) + ggtheme2

joint_tests(models.nback_rt[[2]], by=c("Phase"))
Phase = Early:
model term df1 df2 F.ratio p.value
Session 1 Inf 0.112 0.7380
ECN 1 Inf 0.000 0.9826
Session:ECN 1 Inf 0.094 0.7596
Phase = Late:
model term df1 df2 F.ratio p.value
Session 1 Inf 0.061 0.8056
ECN 1 Inf 6.281 0.0122
Session:ECN 1 Inf 0.052 0.8204
emtrends(models.nback_rt[[2]], pairwise ~ Phase, var="ECN")
NOTE: Results may be misleading due to involvement in interactions
$emtrends
Phase ECN.trend SE df asymp.LCL asymp.UCL
Early -0.000172 0.00789 Inf -0.0156 0.01529
Late -0.022360 0.00892 Inf -0.0398 -0.00487
Results are averaged over the levels of: Session
Confidence level used: 0.95
$contrasts
contrast estimate SE df z.ratio p.value
Early - Late 0.0222 0.0105 Inf 2.106 0.0352
Results are averaged over the levels of: Session
ggplot(df_task.nback_rts, aes(y=rt_s, x=SN, colour=Session)) + geom_smooth(method="lm") + scale_color_manual(values=stress_colors) + facet_grid(.~Phase) + ggtheme2

Proportion of Errors
For the error rates, we cant really take the trial-by-trial approach like we do with the reaction times, so instead we will take a look at the error rates per run/condition. We can use this filtered data frame later too for LIASES since we have a similar measure per session*run. We first calculate the number of errors instead of the proportion. Thats mainly because the proportion doesn’t fit will in the models, and isn’t an accurate representation given the zero-inflation. So we use counts instead, that way we can use a Poisson model which would fit the data much better.
# Subset
df_task.nback_sum <- df_task.nback_rts
df_task.nback_sum <- df_task.nback_sum %>% dplyr::select("sub_id", "Session", "Phase", "Run","run", "LIASES","pe", "FA", "Miss", "ECN", "SN", "SN_ECN") %>% group_by(sub_id, Session, Run) %>% dplyr::mutate(FA_count=sum(FA, na.rm=TRUE), Miss_count=sum(Miss, na.rm=TRUE), error_count=(FA_count+Miss_count)) %>% ungroup() %>% dplyr::select("sub_id", "Session", "Phase", "Run","run", "LIASES","pe", "error_count", "ECN", "SN", "SN_ECN") %>% unique()
# Rescale the variables
df_task.nback_sum$LIASES_z <- scale(df_task.nback_sum$LIASES, center=TRUE, scale=TRUE)
df_task.nback_sum$pe_z <- scale(df_task.nback_sum$pe, center=TRUE, scale=TRUE)
Models
# Contrasts to investigate
signals=list("", "ECN")
# Ru moddels in parallel
models.nback_pe <- mcmapply(signals, FUN=function(X){fx.par_model(signal=X,
dv="error_count",
iv=ifelse(X=="", "Session*Phase", "1+Session*Phase*"),
re=ifelse(X=="", "1+Session*Phase", paste("1+Session*Phase+", X, sep="")),
sub_id="sub_id",
fam=poisson(),
data=df_task.nback_sum)}, mc.cores=5 )
the condition has length > 1 and only the first element will be usedthe condition has length > 1 and only the first element will be used
#HTML Output
tab.2back_pe <- tab_model(models.nback_pe, dv.labels=signals, show.se=T, show.stat=T)
asis_output(tab.2back_pe$knitr)
|
|
|
ECN
|
|
Predictors
|
Incidence Rate Ratios
|
std. Error
|
CI
|
Statistic
|
p
|
Incidence Rate Ratios
|
std. Error
|
CI
|
Statistic
|
p
|
|
(Intercept)
|
0.99
|
0.10
|
0.82 – 1.20
|
-0.09
|
0.931
|
0.95
|
0.10
|
0.77 – 1.18
|
-0.46
|
0.646
|
|
Session [Stress]
|
0.89
|
0.07
|
0.76 – 1.04
|
-1.51
|
0.131
|
0.95
|
0.09
|
0.79 – 1.15
|
-0.53
|
0.595
|
|
Phase1
|
1.01
|
0.05
|
0.92 – 1.10
|
0.16
|
0.875
|
0.99
|
0.06
|
0.87 – 1.11
|
-0.24
|
0.812
|
|
Session [Stress] * Phase1
|
0.96
|
0.07
|
0.84 – 1.11
|
-0.53
|
0.594
|
1.01
|
0.09
|
0.85 – 1.19
|
0.12
|
0.908
|
|
ECN
|
|
|
|
|
|
1.06
|
0.09
|
0.91 – 1.25
|
0.76
|
0.444
|
|
Session [Stress] * ECN
|
|
|
|
|
|
0.91
|
0.09
|
0.74 – 1.11
|
-0.95
|
0.345
|
|
Phase1 * ECN
|
|
|
|
|
|
1.03
|
0.07
|
0.90 – 1.18
|
0.46
|
0.647
|
(Session [Stress] Phase1) ECN
|
|
|
|
|
|
0.96
|
0.09
|
0.80 – 1.16
|
-0.43
|
0.668
|
|
Random Effects
|
|
σ2
|
0.72
|
0.72
|
|
τ00
|
0.58 sub_id
|
0.57 sub_id
|
|
τ11
|
0.03 sub_id.Session1
|
0.03 sub_id.Session1
|
|
|
0.00 sub_id.Phase1
|
0.01 sub_id.Phase1
|
|
|
0.01 sub_id.Session1:Phase1
|
0.03 sub_id.ECN
|
|
|
|
0.01 sub_id.Session1:Phase1
|
|
ρ01
|
-0.46
|
-0.47
|
|
|
-0.08
|
-0.23
|
|
|
0.20
|
-0.17
|
|
|
|
0.41
|
|
ICC
|
|
0.44
|
|
N
|
70 sub_id
|
70 sub_id
|
|
Observations
|
838
|
838
|
|
Marginal R2 / Conditional R2
|
0.005 / NA
|
0.004 / 0.447
|
Post-Hoc: Main
emmeans(models.nback_pe[[1]], list(pairwise ~Session*Phase), by=c("Phase"), adjust = "tukey")
$`emmeans of Session | Phase`
Phase = Early:
Session emmean SE df asymp.LCL asymp.UCL
Control -0.00137 0.110 Inf -0.217 0.2140
Stress -0.15670 0.124 Inf -0.399 0.0858
Phase = Late:
Session emmean SE df asymp.LCL asymp.UCL
Control -0.01545 0.103 Inf -0.217 0.1859
Stress -0.09563 0.127 Inf -0.344 0.1527
Results are given on the log (not the response) scale.
Confidence level used: 0.95
$`pairwise differences of Session | Phase`
Phase = Early:
2 estimate SE df z.ratio p.value
Control - Stress 0.1553 0.1103 Inf 1.408 0.1590
Phase = Late:
2 estimate SE df z.ratio p.value
Control - Stress 0.0802 0.0996 Inf 0.805 0.4208
Results are given on the log (not the response) scale.
Post-Hoc: Brain
joint_tests(models.nback_pe[[2]], by=c("Session", "Phase"))
Session = Control, Phase = Early:
model term df1 df2 F.ratio p.value
ECN 1 Inf 0.795 0.3725
Session = Stress, Phase = Early:
model term df1 df2 F.ratio p.value
ECN 1 Inf 0.199 0.6554
Session = Control, Phase = Late:
model term df1 df2 F.ratio p.value
ECN 1 Inf 0.082 0.7745
Session = Stress, Phase = Late:
model term df1 df2 F.ratio p.value
ECN 1 Inf 0.052 0.8199
Plot
df.nback_error_sum <- summarySEwithin(data=df_task.nback_sum, measurevar="error_count", idvar="sub_id", withinvars= c("Phase", "Session"), na.rm=FALSE)
df.nback_error_sum$Phase <- as.numeric(df.nback_error_sum$Phase)
df.nback_error_sum$Session <- relevel(df.nback_error_sum$Session, "Stress")
plot.nback_errors <- ggplot(df.nback_error_sum, aes(x=Phase, y=error_count, color=Session, fill=Session)) +
geom_bar(stat="summary", position="dodge2", alpha=0.5) +
geom_errorbar(aes(ymin=error_count-se, ymax=error_count+se, color=Session),size=1, width=0.45, na.rm=T, position=position_dodge(.9) ) + ggtheme2
plot.nback_errors

LIASES
The final measure we use to calculate the n-back performance is LIASES, which combines accuracy and speed. This method is mentioned earlier, and a data frame was subsetted in the chunk looking at the error rates, so we just need to build the model and run it here. I first tested different fits, and it appears that the inverse Gaussian is the best to fit this data.
Hist.
hist((df_task.nback_sum$LIASES_z))

Models
# Parallel
signals=list("", "ECN" )
models.nback_liases <- mcmapply(signals, FUN=function(X){fx.par_model(signal=X,
dv="LIASES_z",
iv=ifelse(X=="", "Session*Phase", "1+Session*Phase*"),
re=ifelse(X=="", "1+Session*Phase", paste("1+Session*Phase+", X, sep="")),
sub_id="sub_id",
fam=gaussian(link="inverse"),
data=df_task.nback_sum)}, mc.cores=5)
#HTML Output
tab.2back_liases <- tab_model(models.nback_liases, dv.labels=c("Signal","Signal ~ ECN" ),show.stat=T, show.se=T)
asis_output(tab.2back_liases$knitr)
|
|
Signal
|
Signal ~ ECN
|
|
Predictors
|
Estimates
|
std. Error
|
CI
|
Statistic
|
p
|
Estimates
|
std. Error
|
CI
|
Statistic
|
p
|
|
(Intercept)
|
0.17
|
0.21
|
-0.25 – 0.59
|
0.82
|
0.415
|
0.28
|
0.27
|
-0.24 – 0.81
|
1.05
|
0.292
|
|
Session [Stress]
|
-0.29
|
0.21
|
-0.71 – 0.13
|
-1.36
|
0.174
|
-0.22
|
0.26
|
-0.73 – 0.29
|
-0.84
|
0.399
|
|
Phase1
|
0.02
|
0.13
|
-0.24 – 0.28
|
0.12
|
0.904
|
-0.09
|
0.17
|
-0.42 – 0.24
|
-0.56
|
0.579
|
|
Session [Stress] * Phase1
|
-0.20
|
0.21
|
-0.61 – 0.21
|
-0.96
|
0.338
|
-0.02
|
0.24
|
-0.48 – 0.44
|
-0.08
|
0.935
|
|
ECN
|
|
|
|
|
|
-0.14
|
0.19
|
-0.52 – 0.23
|
-0.75
|
0.455
|
|
Session [Stress] * ECN
|
|
|
|
|
|
-0.31
|
0.26
|
-0.82 – 0.21
|
-1.17
|
0.240
|
|
Phase1 * ECN
|
|
|
|
|
|
0.15
|
0.17
|
-0.18 – 0.48
|
0.90
|
0.368
|
(Session [Stress] Phase1) ECN
|
|
|
|
|
|
-0.16
|
0.25
|
-0.64 – 0.33
|
-0.63
|
0.528
|
|
Random Effects
|
|
σ2
|
0.51
|
0.51
|
|
τ00
|
2.13 sub_id
|
2.57 sub_id
|
|
τ11
|
0.57 sub_id.Session1
|
0.53 sub_id.Session1
|
|
|
0.44 sub_id.Phase1
|
0.46 sub_id.Phase1
|
|
|
0.53 sub_id.Session1:Phase1
|
0.12 sub_id.ECN
|
|
|
|
0.38 sub_id.Session1:Phase1
|
|
ρ01
|
0.02
|
0.09
|
|
|
-0.12
|
-0.12
|
|
|
0.12
|
-0.74
|
|
|
|
0.17
|
|
ICC
|
0.83
|
0.84
|
|
N
|
70 sub_id
|
70 sub_id
|
|
Observations
|
838
|
838
|
|
Marginal R2 / Conditional R2
|
0.012 / 0.836
|
0.039 / 0.845
|
Post-Hoc: Behavioural
emmeans(models.nback_liases[[1]], pairwise ~ Session | Phase, adjust="none")
Note: Use 'contrast(regrid(object), ...)' to obtain contrasts of back-transformed estimates
$emmeans
Phase = Early:
Session emmean SE df asymp.LCL asymp.UCL
Control 0.1907 0.264 Inf -0.327 0.709
Stress -0.2990 0.232 Inf -0.754 0.156
Phase = Late:
Session emmean SE df asymp.LCL asymp.UCL
Control 0.1586 0.239 Inf -0.311 0.628
Stress 0.0675 0.286 Inf -0.492 0.627
Results are given on the inverse (not the response) scale.
Confidence level used: 0.95
$contrasts
Phase = Early:
contrast estimate SE df z.ratio p.value
Control - Stress 0.490 0.299 Inf 1.639 0.1012
Phase = Late:
contrast estimate SE df z.ratio p.value
Control - Stress 0.091 0.297 Inf 0.306 0.7595
Note: contrasts are still on the inverse scale
Plot: Behavioral
df_task.nback_summ <- summarySEwithin(df_task.nback, measurevar="LIASES", idvar="sub_id", withinvar=c("Session", "Phase"), na.rm=T)
Automatically converting the following non-factors to factors: Phase
df_task.nback_summ$run <- as.numeric(df_task.nback_summ$Phase)
df_task.nback_summ$Session <- relevel(df_task.nback_summ$Session, "Stress")
ggplot(df_task.nback_summ, aes(x=run, y=LIASES, color=Session, fill=Session)) +
geom_line() +
geom_errorbar(aes(ymin=LIASES-se, ymax=LIASES+se, color=Session),size=0.5, width=0.15, na.rm=T) +
ggtheme2

Post-Hoc: ECN by Session
emtrends(models.nback_liases[[2]], identity ~ Session, var="ECN")
NOTE: Results may be misleading due to involvement in interactions
$emtrends
Session ECN.trend SE df asymp.LCL asymp.UCL
Control -0.144 0.192 Inf -0.52 0.2330
Stress -0.451 0.214 Inf -0.87 -0.0318
Results are averaged over the levels of: Phase
Confidence level used: 0.95
$contrasts
contrast estimate SE df z.ratio p.value
Control -0.144 0.192 Inf -0.748 0.4547
Stress -0.451 0.214 Inf -2.109 0.0350
Results are averaged over the levels of: Phase
#emtrends(models.nback_liases[[3]], pairwise ~ Session | Phase, var="ECN")
ggplot(df_task.nback_sum, aes(x=ECN, y=LIASES_z, colour=Session)) +
geom_smooth(method="lm", se=T) +
xlab("ECN Activity") + ylab("LIASES Accuracy Score") +
ggtheme2 + scale_color_manual(values=stress_colors)

Memory
For the memory task, we look at the reaction time to the stimuli, split by valence, and the recognition of the images we presented them with outside the scanner. For the RT data, we will take a look at individual trials, and then for the recognition of the images we take a look at the average reposes per run or phase, depending on the model fits best. We first also remove any rts that are less than 200ms as those are just too quick, and we also rescale the RT variables.
df_task.mem <- df_task.mem %>% subset(rt>=200)
df_task.mem$rt_z <- scale(df_task.mem$rt)
df_task.mem$rt_s <- scales::rescale(df_task.mem$rt_z, to=c(1,6))
Recognition
We next look at the ability to recall the object-location associations. We first will look at overall recall, then take a look at the recall by valence associations to see if the negative or positively valent images are remembered better. First I subset the data, and make the data frame a bit longer so that we have percent correct for each of the neutral and the negative items in one column.
# Subset the neutral item correct response rates
df_task.mem_neu <- df_task.mem %>% dplyr::select("sub_id", "Session", "Phase", "Run","run", "mem_neu_per", "DMN") %>% unique() %>% dplyr::rename(per_cor=mem_neu_per) %>% mutate(valence="neu")
# Do the same for the negative items
df_task.mem_neg <- df_task.mem %>% dplyr::select("sub_id", "Session", "Phase", "Run","run", "mem_neg_per", "DMN") %>% unique() %>% dplyr::rename(per_cor=mem_neg_per) %>% mutate(valence="neg")
# Bind them into one dataframe
df_task.mem_cor <- rbind.data.frame(df_task.mem_neg, df_task.mem_neu)
Now we can also check out the distribution of the correct response rates for each of the valences to help pick the most suitable model family. We see that both valences look similar, but skewed left.
Hist.
hist(df_task.mem_neu$per_cor)

hist(df_task.mem_neg$per_cor)

Chance Level
model.memory_chance <- t.test(df_task.mem$mem_tot_per, mu=0.25)
tidy(model.memory_chance)
Models
# Main
model.mem_correct <- lmer(per_cor ~ Session*Phase*valence + (1+Session*Phase|sub_id),
contrasts=list(Session="contr.treatment"),
data=df_task.mem_cor,
control=lmerControl(calc.derivs = FALSE, optCtrl=list(maxfun=100000)) )
#check_model(model.mem_correct)
# Brain
model.mem_correct_brain <- lmer(per_cor ~ Session*Phase*DMN + valence + (1+Session*Phase*DMN|sub_id),
contrasts=list(Session="contr.treatment"),
data=df_task.mem_cor,
control=lmerControl(calc.derivs = FALSE, optimizer="bobyqa", optCtrl=list(maxfun=100000)) )
# HTM Table
asis_output(tab_model(model.mem_correct, model.mem_correct_brain)$knitr)
|
|
per cor
|
per cor
|
|
Predictors
|
Estimates
|
CI
|
p
|
Estimates
|
CI
|
p
|
|
(Intercept)
|
0.61
|
0.57 – 0.64
|
<0.001
|
0.62
|
0.58 – 0.66
|
<0.001
|
|
Session [Stress]
|
0.01
|
-0.01 – 0.03
|
0.474
|
0.00
|
-0.02 – 0.03
|
0.841
|
|
Phase1
|
-0.01
|
-0.02 – 0.01
|
0.581
|
-0.01
|
-0.03 – 0.01
|
0.510
|
|
valence1
|
0.03
|
0.02 – 0.04
|
<0.001
|
0.03
|
0.02 – 0.03
|
<0.001
|
|
Session [Stress] * Phase1
|
0.01
|
-0.01 – 0.03
|
0.306
|
0.01
|
-0.02 – 0.04
|
0.421
|
Session [Stress] * valence1
|
-0.00
|
-0.02 – 0.01
|
0.856
|
|
|
|
|
Phase1 * valence1
|
-0.00
|
-0.01 – 0.01
|
0.440
|
|
|
|
Session [Stress] * Phase1 * valence1
|
0.00
|
-0.01 – 0.02
|
0.853
|
|
|
|
|
DMN
|
|
|
|
0.02
|
-0.00 – 0.05
|
0.099
|
|
Session [Stress] * DMN
|
|
|
|
-0.01
|
-0.04 – 0.02
|
0.575
|
|
Phase1 * DMN
|
|
|
|
-0.01
|
-0.04 – 0.02
|
0.623
|
(Session [Stress] Phase1) DMN
|
|
|
|
0.00
|
-0.03 – 0.04
|
0.827
|
|
Random Effects
|
|
σ2
|
0.02
|
0.02
|
|
τ00
|
0.02 sub_id
|
0.02 sub_id
|
|
τ11
|
0.00 sub_id.Session1
|
0.00 sub_id.Session1
|
|
|
0.00 sub_id.Phase1
|
0.00 sub_id.Phase1
|
|
|
0.00 sub_id.Session1:Phase1
|
0.00 sub_id.DMN
|
|
|
|
0.00 sub_id.Session1:Phase1
|
|
|
|
0.00 sub_id.Session1:DMN
|
|
|
|
0.00 sub_id.Phase1:DMN
|
|
|
|
0.00 sub_id.Session1:Phase1:DMN
|
|
ρ01
|
-0.04
|
0.04
|
|
|
-0.31
|
-0.45
|
|
|
0.04
|
0.61
|
|
|
|
-0.09
|
|
|
|
0.63
|
|
|
|
-0.37
|
|
|
|
-0.29
|
|
ICC
|
0.48
|
|
|
N
|
70 sub_id
|
70 sub_id
|
|
Observations
|
1676
|
1676
|
|
Marginal R2 / Conditional R2
|
0.015 / 0.489
|
0.033 / NA
|
Post-Hoc: Main
Only the valence effect sticks out, so we run a specific post-hoc test to investigate it further.
emmeans(model.mem_correct, list(pairwise ~Session*valence), by=c("Phase"), adjust = "tukey")
$`emmeans of Session, valence | Phase`
Phase = Early:
Session valence emmean SE df lower.CL upper.CL
Control neg 0.625 0.0215 90.5 0.582 0.667
Stress neg 0.645 0.0206 92.8 0.604 0.686
Control neu 0.581 0.0215 90.5 0.538 0.624
Stress neu 0.601 0.0206 92.8 0.561 0.642
Phase = Late:
Session valence emmean SE df lower.CL upper.CL
Control neg 0.644 0.0230 87.3 0.598 0.689
Stress neg 0.637 0.0228 87.5 0.592 0.683
Control neu 0.583 0.0230 87.3 0.538 0.629
Stress neu 0.582 0.0228 87.5 0.537 0.628
Degrees-of-freedom method: kenward-roger
Confidence level used: 0.95
$`pairwise differences of Session, valence | Phase`
Phase = Early:
2 estimate SE df t.ratio p.value
Control neg - Stress neg -0.020473 0.0211 126 -0.969 0.7671
Control neg - Control neu 0.043547 0.0153 1392 2.839 0.0237
Control neg - Stress neu 0.023124 0.0211 126 1.095 0.6934
Stress neg - Control neu 0.064020 0.0211 126 3.031 0.0155
Stress neg - Stress neu 0.043597 0.0153 1392 2.849 0.0230
Control neu - Stress neu -0.020423 0.0211 126 -0.967 0.7685
Phase = Late:
2 estimate SE df t.ratio p.value
Control neg - Stress neg 0.006374 0.0185 158 0.345 0.9858
Control neg - Control neu 0.060314 0.0153 1392 3.932 0.0005
Control neg - Stress neu 0.061059 0.0185 158 3.305 0.0064
Stress neg - Control neu 0.053940 0.0185 158 2.920 0.0207
Stress neg - Stress neu 0.054685 0.0153 1392 3.574 0.0021
Control neu - Stress neu 0.000746 0.0185 158 0.040 1.0000
Degrees-of-freedom method: kenward-roger
P value adjustment: tukey method for comparing a family of 4 estimates
Plot: Main
df_task.mem_cor_sum <- summarySEwithin(data=df_task.mem_cor, idvar="sub_id", measurevar="per_cor", withinvars=c("Session","Phase","valence"))
Automatically converting the following non-factors to factors: Phase, valence
plot.mem_corr <- ggplot(df_task.mem_cor_sum, aes(y=per_cor, x=Phase, colour=valence, fill=valence, na.rm = TRUE)) +
geom_bar(stat="summary", alpha=.5, position=position_dodge2())+
geom_errorbar(aes(ymin=per_cor-se, ymax=per_cor+se), position=position_dodge(.9),size=1, width=0.45, na.rm=T) +
scale_y_continuous()+ scale_x_discrete()+
ggtitle("Picture-Location Recall")+
ylab("Percent Remembered\n") +
ggtheme2
plot.mem_corr + facet_grid(.~Session)

Post-Hoc: DMN by Phase
emtrends(model.mem_correct_brain, pairwise ~ Phase, var="DMN")
NOTE: Results may be misleading due to involvement in interactions
$emtrends
Phase DMN.trend SE df lower.CL upper.CL
Early 0.0112 0.0177 28.0 -0.0250 0.0474
Late 0.0213 0.0178 28.9 -0.0152 0.0578
Results are averaged over the levels of: Session, valence
Degrees-of-freedom method: kenward-roger
Confidence level used: 0.95
$contrasts
contrast estimate SE df t.ratio p.value
Early - Late -0.0101 0.0252 33.9 -0.399 0.6928
Results are averaged over the levels of: Session, valence
Degrees-of-freedom method: kenward-roger
emmip(model.mem_correct_brain, Phase ~ DMN, cov.reduce = range) + scale_color_manual(values=stress_colors) + ggtheme2
NOTE: Results may be misleading due to involvement in interactions

Reaction Time
We first take a look at the reaction time. This analysis is done on a trial by trial basis. We will look at the RT between the stress and control sessions, and also look at the effects of the valence of the images on the reaction time. First we take a look at the distribution of the RT’s to better inform us of which model family would fit best. It appears here that we have a significant effect of valence on reaction times. So participants are faster in responding.
Hist.
hist(df_task.mem$rt)

Models
# Main
model.memory_rt <- glmer(rt_s ~ Session*Phase + Session*valence +Session*Phase*valence + (1+Session*Phase|sub_id),
contrasts=list(Session="contr.treatment"),
data=df_task.mem,
family=Gamma("inverse"),
control=glmerControl(calc.derivs=FALSE))
# Brain
model.memory_rt_brain <- glmer(rt_s ~ Session*Phase*DMN + (1+Session*Phase*DMN|sub_id),
contrasts=list(Session="contr.treatment"),
data=df_task.mem,
family=Gamma("identity"),
control=glmerControl(calc.derivs=FALSE, optimizer="bobyqa", optCtrl=list(maxfun=100000)))
# HTML Output
asis_output(tab_model(model.memory_rt, model.memory_rt_brain)$knitr)
|
|
rt s
|
rt s
|
|
Predictors
|
Estimates
|
CI
|
p
|
Estimates
|
CI
|
p
|
|
(Intercept)
|
1.62
|
1.60 – 1.63
|
<0.001
|
2.19
|
2.16 – 2.23
|
<0.001
|
|
Session [Stress]
|
1.00
|
1.00 – 1.01
|
0.474
|
-0.01
|
-0.04 – 0.03
|
0.753
|
|
Phase1
|
1.00
|
0.99 – 1.00
|
0.038
|
0.03
|
0.01 – 0.05
|
0.001
|
|
valence1
|
1.00
|
1.00 – 1.00
|
0.062
|
|
|
|
|
Session [Stress] * Phase1
|
1.00
|
1.00 – 1.01
|
0.438
|
-0.02
|
-0.05 – 0.01
|
0.220
|
Session [Stress] * valence1
|
1.00
|
1.00 – 1.01
|
0.368
|
|
|
|
|
Phase1 * valence1
|
1.00
|
1.00 – 1.00
|
0.195
|
|
|
|
Session [Stress] * Phase1 * valence1
|
1.00
|
1.00 – 1.00
|
0.935
|
|
|
|
|
DMN
|
|
|
|
-0.02
|
-0.05 – 0.01
|
0.191
|
|
Session [Stress] * DMN
|
|
|
|
0.03
|
-0.01 – 0.08
|
0.146
|
|
Phase1 * DMN
|
|
|
|
0.05
|
0.02 – 0.08
|
0.003
|
(Session [Stress] Phase1) DMN
|
|
|
|
-0.01
|
-0.05 – 0.04
|
0.703
|
|
Random Effects
|
|
σ2
|
0.07
|
0.07
|
|
τ00
|
0.00 sub_id
|
0.01 sub_id
|
|
τ11
|
0.00 sub_id.Session1
|
0.00 sub_id.Session1
|
|
|
0.00 sub_id.Phase1
|
0.00 sub_id.Phase1
|
|
|
0.00 sub_id.Session1:Phase1
|
0.00 sub_id.DMN
|
|
|
|
0.00 sub_id.Session1:Phase1
|
|
|
|
0.00 sub_id.Session1:DMN
|
|
|
|
0.00 sub_id.Phase1:DMN
|
|
|
|
0.00 sub_id.Session1:Phase1:DMN
|
|
ρ01
|
-0.01
|
-0.01
|
|
|
-0.04
|
-0.06
|
|
|
0.11
|
-0.96
|
|
|
|
0.15
|
|
|
|
-0.12
|
|
|
|
0.24
|
|
|
|
0.23
|
|
ICC
|
0.01
|
|
|
N
|
70 sub_id
|
70 sub_id
|
|
Observations
|
17529
|
17529
|
|
Marginal R2 / Conditional R2
|
0.000 / 0.011
|
0.020 / NA
|
Post-Hoc: Main
emmeans(model.memory_rt, list(pairwise ~ Session*Phase), by=c("Phase"), adjust = "tukey")
NOTE: Results may be misleading due to involvement in interactions
Note: Use 'contrast(regrid(object), ...)' to obtain contrasts of back-transformed estimates
$`emmeans of Session | Phase`
Phase = Early:
Session emmean SE df asymp.LCL asymp.UCL
Control 0.475 0.00417 Inf 0.467 0.483
Stress 0.480 0.00409 Inf 0.472 0.488
Phase = Late:
Session emmean SE df asymp.LCL asymp.UCL
Control 0.484 0.00425 Inf 0.476 0.493
Stress 0.485 0.00436 Inf 0.476 0.493
Results are averaged over the levels of: valence
Results are given on the inverse (not the response) scale.
Confidence level used: 0.95
$`pairwise differences of Session | Phase`
Phase = Early:
2 estimate SE df z.ratio p.value
Control - Stress -0.004940 0.00455 Inf -1.085 0.2780
Phase = Late:
2 estimate SE df z.ratio p.value
Control - Stress -0.000372 0.00491 Inf -0.076 0.9396
Results are averaged over the levels of: valence
Note: contrasts are still on the inverse scale
Plot: Main
df_task.mem_sum <- summarySEwithin(df_task.mem, measurevar="rt",idvar="sub_id", withinvars= c("Session","Phase","valence"))
Automatically converting the following non-factors to factors: Phase
df_task.mem_sum$run = as.numeric((df_task.mem_sum$Phase))
df_task.mem_sum$valence <- relevel(df_task.mem_sum$valence, "negative")
plot.mem_rt <- ggplot(df_task.mem_sum, aes(x=run, y=rt, colour=valence)) +
geom_line() +
geom_errorbar(aes(ymin=rt+se, ymax=rt-se),width=0.2, na.rm=TRUE) +
scale_x_continuous(breaks=c(1,2), labels = c("Early", "Late")) +
ggtheme2
plot.mem_rt + facet_grid(.~Session)

Post-Hoc: Brain
joint_tests(model.memory_rt_brain, by=c("Session","Phase"))
Session = Control, Phase = Early:
model term df1 df2 F.ratio p.value
DMN 1 Inf 1.768 0.1836
Session = Stress, Phase = Early:
model term df1 df2 F.ratio p.value
DMN 1 Inf 4.950 0.0261
Session = Control, Phase = Late:
model term df1 df2 F.ratio p.value
DMN 1 Inf 7.306 0.0069
Session = Stress, Phase = Late:
model term df1 df2 F.ratio p.value
DMN 1 Inf 1.464 0.2264
ggplot(df_task.mem, aes(x=DMN, y=rt_s, colour=Session)) + geom_smooth(method="lm") + ggtheme2 + facet_grid(.~Phase) + scale_color_manual(values=stress_colors)

Oddball
The oddball data has already been processed, theres not much more we need to do there. Therefore, we can begin with the stats.
d-Prime
Hist.
hist(df_task.odd$dprime)

Models
# Signals
signals=c("", "SN" )
# Run in parallel function
models.odd_dprime <- mcmapply(signals, FUN=function(X){fx.par_model(signal=X,
dv="dprime",
iv=ifelse(X=="", "Session*Run", "1+Session*Run*"),
re=ifelse(X=="", "1+Session+Run", paste("1+Session+Run")),
sub_id="sub_nr",
fam="lmer",
data=df_task.odd)}, mc.cores=5)
# Print HTML Output
asis_output(tab_model(models.odd_dprime, dv.labels = c("Signal", "Signal ~ SN"), show.stat =T, show.se=T)$knitr)
|
|
Signal
|
Signal ~ SN
|
|
Predictors
|
Estimates
|
std. Error
|
CI
|
Statistic
|
p
|
Estimates
|
std. Error
|
CI
|
Statistic
|
p
|
|
(Intercept)
|
0.40
|
0.05
|
0.31 – 0.49
|
8.58
|
<0.001
|
0.34
|
0.07
|
0.20 – 0.47
|
4.92
|
<0.001
|
|
Session [Stress]
|
-0.43
|
0.06
|
-0.54 – -0.32
|
-7.39
|
<0.001
|
-0.38
|
0.09
|
-0.56 – -0.20
|
-4.10
|
<0.001
|
|
Run1
|
0.01
|
0.03
|
-0.05 – 0.07
|
0.38
|
0.703
|
0.04
|
0.06
|
-0.07 – 0.15
|
0.69
|
0.492
|
|
Session [Stress] * Run1
|
-0.04
|
0.04
|
-0.12 – 0.05
|
-0.90
|
0.369
|
-0.08
|
0.08
|
-0.24 – 0.07
|
-1.04
|
0.299
|
|
SN
|
|
|
|
|
|
0.05
|
0.05
|
-0.04 – 0.15
|
1.15
|
0.250
|
|
Session [Stress] * SN
|
|
|
|
|
|
-0.05
|
0.06
|
-0.17 – 0.08
|
-0.75
|
0.452
|
|
Run1 * SN
|
|
|
|
|
|
-0.02
|
0.04
|
-0.10 – 0.06
|
-0.43
|
0.667
|
(Session [Stress] * Run1) * SN
|
|
|
|
|
|
0.03
|
0.06
|
-0.08 – 0.15
|
0.56
|
0.575
|
|
Random Effects
|
|
σ2
|
0.10
|
0.10
|
|
τ00
|
0.02 sub_nr
|
0.02 sub_nr
|
|
τ11
|
0.02 sub_nr.Session1
|
0.02 sub_nr.Session1
|
|
|
0.00 sub_nr.Run1
|
0.00 sub_nr.Run1
|
|
ρ01
|
0.57
|
0.58
|
|
|
-0.82
|
-0.88
|
|
N
|
63 sub_nr
|
63 sub_nr
|
|
Observations
|
222
|
222
|
|
Marginal R2 / Conditional R2
|
0.319 / NA
|
0.317 / NA
|
Plot: Main
plot.odd_sess <- ggplot(df_task.odd, aes(x=Scan, y=dprime, colour=Scan, fill=Scan)) +
geom_violin(alpha=0.5) +
geom_boxplot(width=0.1, alpha=0.2) +
geom_hline(yintercept=0, colour="grey") +
ggtheme2
plot.odd_sess

Post-Hoc: Brain
joint_tests(models.odd_dprime[[2]], by=c("Session", "Run"))
Session = Control, Run = Early:
model term df1 df2 F.ratio p.value
SN 1 87.57 0.328 0.5686
Session = Stress, Run = Early:
model term df1 df2 F.ratio p.value
SN 1 85.42 0.118 0.7318
Session = Control, Run = Late:
model term df1 df2 F.ratio p.value
SN 1 101.58 1.121 0.2922
Session = Stress, Run = Late:
model term df1 df2 F.ratio p.value
SN 1 86.14 0.022 0.8818
Hits
Hist.
We first check the hits. It appears that there is a significant difference in the hit-rate between the two sessions, and between the runs.
hist(df_task.odd$odd_hits)

Models
# Contrasts to investigate
signals=c("", "SN")
# Run in parallel function
models.odd_hits <- mcmapply(signals, FUN=function(X){fx.par_model(signal=X,
dv="odd_hits",
iv=ifelse(X=="", "Session*Run", "1+Session*Run*"),
re=ifelse(X=="", "1+Session+Run", paste("1+Session+Run")),
sub_id="sub_nr",
fam="lmer",
data=df_task.odd)}, mc.cores=5)
# Print HTML Output
asis_output(tab_model(models.odd_hits, dv.labels = c("Signal", "Signal ~ SN"), show.stat =T, show.se=T)$knitr)
|
|
Signal
|
Signal ~ SN
|
|
Predictors
|
Estimates
|
std. Error
|
CI
|
Statistic
|
p
|
Estimates
|
std. Error
|
CI
|
Statistic
|
p
|
|
(Intercept)
|
20.34
|
0.69
|
18.99 – 21.69
|
29.46
|
<0.001
|
20.21
|
0.95
|
18.35 – 22.07
|
21.34
|
<0.001
|
|
Session [Stress]
|
-3.40
|
0.68
|
-4.74 – -2.06
|
-4.97
|
<0.001
|
-3.88
|
1.26
|
-6.36 – -1.41
|
-3.08
|
0.002
|
|
Run1
|
-1.85
|
0.43
|
-2.68 – -1.01
|
-4.34
|
<0.001
|
-1.77
|
0.73
|
-3.20 – -0.33
|
-2.42
|
0.016
|
|
Session [Stress] * Run1
|
0.22
|
0.57
|
-0.90 – 1.34
|
0.38
|
0.701
|
1.06
|
1.11
|
-1.12 – 3.24
|
0.95
|
0.340
|
|
SN
|
|
|
|
|
|
0.12
|
0.59
|
-1.04 – 1.28
|
0.20
|
0.842
|
|
Session [Stress] * SN
|
|
|
|
|
|
0.50
|
0.92
|
-1.30 – 2.30
|
0.54
|
0.587
|
|
Run1 * SN
|
|
|
|
|
|
-0.07
|
0.55
|
-1.14 – 1.00
|
-0.13
|
0.900
|
(Session [Stress] * Run1) * SN
|
|
|
|
|
|
-0.79
|
0.83
|
-2.42 – 0.83
|
-0.96
|
0.339
|
|
Random Effects
|
|
σ2
|
17.02
|
17.40
|
|
τ00
|
24.18 sub_nr
|
24.36 sub_nr
|
|
τ11
|
2.15 sub_nr.Session1
|
2.17 sub_nr.Session1
|
|
|
1.10 sub_nr.Run1
|
0.87 sub_nr.Run1
|
|
ρ01
|
-0.46
|
-0.45
|
|
|
-0.11
|
-0.11
|
|
ICC
|
0.60
|
0.59
|
|
N
|
63 sub_nr
|
63 sub_nr
|
|
Observations
|
222
|
222
|
|
Marginal R2 / Conditional R2
|
0.126 / 0.649
|
0.132 / 0.646
|
Plot: Main
plot.odd_hits <- ggplot(df_task.odd, aes(x=Scan, y=odd_hits, colour=Scan, fill=Scan)) +
geom_violin(alpha=0.5) +
geom_boxplot(width=0.1, alpha=0.2) +
ggtheme2
plot.odd_hits

Post-Hoc: Brain
joint_tests(models.odd_hits[[2]], by=c("Session", "Run"))
Session = Control, Run = Early:
model term df1 df2 F.ratio p.value
SN 1 88.29 0.004 0.9526
Session = Stress, Run = Early:
model term df1 df2 F.ratio p.value
SN 1 97.52 0.063 0.8024
Session = Control, Run = Late:
model term df1 df2 F.ratio p.value
SN 1 97.12 0.048 0.8266
Session = Stress, Run = Late:
model term df1 df2 F.ratio p.value
SN 1 98.67 2.142 0.1465
False Alarms
We also see that they perform more errors. So we have decreased hits, and more errors in the stress session
Models
# MLoop over
signals=c("", "SN")
# Run in parallel function
models.odd_fas <- mcmapply(signals, FUN=function(X){fx.par_model(signal=X,
dv="odd_FA",
iv=ifelse(X=="", "Session*Run", "1+Session*Run*"),
re=ifelse(X=="", "1+Session+Run", paste("1+Session+Run")),
sub_id="sub_nr",
fam="lmer",
data=df_task.odd)}, mc.cores=5)
# HTML Output
tab.odd_fas <- tab_model(models.odd_fas, show.se=T, show.stat=T, dv.labels=c("Signal", "Signal ~ SN"))
asis_output(tab.odd_fas$knitr)
|
|
Signal
|
Signal ~ SN
|
|
Predictors
|
Estimates
|
std. Error
|
CI
|
Statistic
|
p
|
Estimates
|
std. Error
|
CI
|
Statistic
|
p
|
|
(Intercept)
|
7.40
|
0.36
|
6.70 – 8.11
|
20.60
|
<0.001
|
8.08
|
0.50
|
7.09 – 9.07
|
16.03
|
<0.001
|
|
Session [Stress]
|
1.67
|
0.43
|
0.82 – 2.52
|
3.86
|
<0.001
|
0.62
|
0.72
|
-0.79 – 2.03
|
0.86
|
0.391
|
|
Run1
|
-1.07
|
0.23
|
-1.52 – -0.62
|
-4.65
|
<0.001
|
-1.45
|
0.41
|
-2.25 – -0.65
|
-3.54
|
<0.001
|
|
Session [Stress] * Run1
|
0.31
|
0.31
|
-0.30 – 0.93
|
1.01
|
0.315
|
1.17
|
0.62
|
-0.05 – 2.38
|
1.88
|
0.060
|
|
SN
|
|
|
|
|
|
-0.58
|
0.34
|
-1.24 – 0.08
|
-1.73
|
0.083
|
|
Session [Stress] * SN
|
|
|
|
|
|
0.95
|
0.51
|
-0.05 – 1.95
|
1.85
|
0.064
|
|
Run1 * SN
|
|
|
|
|
|
0.28
|
0.30
|
-0.31 – 0.88
|
0.93
|
0.352
|
(Session [Stress] * Run1) * SN
|
|
|
|
|
|
-0.74
|
0.46
|
-1.64 – 0.16
|
-1.61
|
0.107
|
|
Random Effects
|
|
σ2
|
5.12
|
5.54
|
|
τ00
|
4.34 sub_nr
|
4.12 sub_nr
|
|
τ11
|
1.38 sub_nr.Session1
|
1.07 sub_nr.Session1
|
|
|
0.23 sub_nr.Run1
|
0.13 sub_nr.Run1
|
|
ρ01
|
-0.15
|
-0.24
|
|
|
-0.21
|
-0.20
|
|
ICC
|
0.47
|
0.44
|
|
N
|
63 sub_nr
|
63 sub_nr
|
|
Observations
|
222
|
222
|
|
Marginal R2 / Conditional R2
|
0.135 / 0.544
|
0.151 / 0.521
|
Plot: Main
plot.odd_fas <- ggplot(df_task.odd, aes(x=Scan, y=odd_FA, colour=Scan, fill=Scan)) +
geom_violin(alpha=0.5) +
geom_boxplot(width=0.1, alpha=0.2) +
ggtheme2
plot.odd_fas

Post-Hoc: SN by Session
emtrends(models.odd_fas[[2]], pairwise ~ Session, var="SN")
NOTE: Results may be misleading due to involvement in interactions
$emtrends
Session SN.trend SE df lower.CL upper.CL
Control -0.582 0.350 142 -1.275 0.11
Stress 0.366 0.406 149 -0.436 1.17
Results are averaged over the levels of: Run
Degrees-of-freedom method: kenward-roger
Confidence level used: 0.95
$contrasts
contrast estimate SE df t.ratio p.value
Control - Stress -0.948 0.532 193 -1.783 0.0762
Results are averaged over the levels of: Run
Degrees-of-freedom method: kenward-roger
joint_tests(models.odd_fas[[2]], by=c("Session"))
Session = Control:
model term df1 df2 F.ratio p.value
Run 1 111.50 21.681 <.0001
SN 1 142.00 2.761 0.0988
Run:SN 1 144.07 0.800 0.3727
Session = Stress:
model term df1 df2 F.ratio p.value
Run 1 111.64 11.243 0.0011
SN 1 149.25 0.813 0.3688
Run:SN 1 139.00 1.636 0.2030
plot.sn_fas <- ggplot(df_task.odd, aes(x=SN, y=odd_FA, colour=Session, fill=Session)) +
geom_smooth(method="lm", formula="y~x", alpha=0.15) +
#geom_point(alpha=0.5)+
scale_color_manual(values=stress_colors) + scale_fill_manual(values=stress_colors) +
ylab("False Alarms") +
ggtheme2
plot.sn_fas + facet_grid(~Run)

dPrime by Valence
Now we take a look at the effects of stress and valence on recall. So we do something similar here to what we did before, where we split the positive and negative values, and then merge them into a long data frame(same as we did for the memory task recognition trials).
# Subset the neutral item correct response rates
df_task.odd_pos <- df_task.odd %>% dplyr::select("sub_nr", "Session", "Run", "Scan","dprime_pos", "SN", "ECN", "SN_ECN") %>% unique() %>% dplyr::rename(dprime=dprime_pos) %>% mutate(Valence="Positive")
# Do the same for the negative items
df_task.odd_neg <- df_task.odd %>% dplyr::select("sub_nr", "Session", "Run", "Scan","dprime_neg", "SN", "ECN", "SN_ECN") %>% unique() %>% dplyr::rename(dprime=dprime_neg) %>% mutate(Valence="Negative")
# Bind them into one dataframe
df_task.odd_val <- rbind.data.frame(df_task.odd_pos, df_task.odd_neg)
Models
# Loop over these
signals=c("", "SN")
# Run in parallel function
models.odd_val <- mcmapply(signals, FUN=function(X){fx.par_model(signal=X,
dv="dprime",
iv=ifelse(X=="", "Session*Run*Valence", "1+Session*Run*Valence*"),
re=ifelse(X=="", "1+Session+Run", paste("1+Session+Run")),
sub_id="sub_nr",
fam="lmer",
data=df_task.odd_val)}, mc.cores=5)
# Print HTML Output
asis_output(tab_model(models.odd_val,
dv.labels = c("Signal", "Signal ~ SN"),
show.stat =T, show.se=T)$knitr)
|
|
Signal
|
Signal ~ SN
|
|
Predictors
|
Estimates
|
std. Error
|
CI
|
Statistic
|
p
|
Estimates
|
std. Error
|
CI
|
Statistic
|
p
|
|
(Intercept)
|
0.39
|
0.05
|
0.30 – 0.48
|
8.49
|
<0.001
|
0.33
|
0.07
|
0.20 – 0.46
|
4.86
|
<0.001
|
|
Session [Stress]
|
-0.42
|
0.06
|
-0.54 – -0.31
|
-7.37
|
<0.001
|
-0.38
|
0.09
|
-0.56 – -0.20
|
-4.16
|
<0.001
|
|
Run1
|
0.01
|
0.03
|
-0.05 – 0.07
|
0.34
|
0.736
|
0.04
|
0.06
|
-0.07 – 0.14
|
0.67
|
0.505
|
|
Valence1
|
-0.01
|
0.03
|
-0.07 – 0.04
|
-0.48
|
0.633
|
0.01
|
0.05
|
-0.09 – 0.10
|
0.12
|
0.904
|
|
Session [Stress] * Run1
|
-0.03
|
0.04
|
-0.12 – 0.05
|
-0.76
|
0.449
|
-0.08
|
0.08
|
-0.24 – 0.08
|
-0.98
|
0.327
|
Session [Stress] * Valence1
|
0.04
|
0.04
|
-0.04 – 0.12
|
0.91
|
0.363
|
0.05
|
0.07
|
-0.09 – 0.20
|
0.73
|
0.462
|
|
Run1 * Valence1
|
0.03
|
0.03
|
-0.02 – 0.09
|
1.15
|
0.249
|
0.01
|
0.05
|
-0.09 – 0.11
|
0.24
|
0.808
|
Session [Stress] * Run1 * Valence1
|
-0.02
|
0.04
|
-0.11 – 0.06
|
-0.53
|
0.595
|
0.03
|
0.07
|
-0.11 – 0.18
|
0.47
|
0.641
|
|
SN
|
|
|
|
|
|
0.05
|
0.05
|
-0.04 – 0.14
|
1.12
|
0.262
|
|
Session [Stress] * SN
|
|
|
|
|
|
-0.04
|
0.06
|
-0.16 – 0.08
|
-0.64
|
0.523
|
|
Run1 * SN
|
|
|
|
|
|
-0.02
|
0.04
|
-0.10 – 0.06
|
-0.44
|
0.660
|
|
Valence1 * SN
|
|
|
|
|
|
-0.02
|
0.04
|
-0.09 – 0.05
|
-0.46
|
0.644
|
(Session [Stress] * Run1) * SN
|
|
|
|
|
|
0.03
|
0.06
|
-0.08 – 0.15
|
0.57
|
0.569
|
(Session [Stress] Valence1) SN
|
|
|
|
|
|
-0.01
|
0.05
|
-0.12 – 0.09
|
-0.21
|
0.835
|
|
(Run1 * Valence1) * SN
|
|
|
|
|
|
0.02
|
0.04
|
-0.05 – 0.09
|
0.53
|
0.599
|
(Session [Stress] * Run1 * Valence1) * SN
|
|
|
|
|
|
-0.05
|
0.05
|
-0.15 – 0.06
|
-0.84
|
0.400
|
|
Random Effects
|
|
σ2
|
0.20
|
0.20
|
|
τ00
|
0.02 sub_nr
|
0.02 sub_nr
|
|
τ11
|
0.02 sub_nr.Session1
|
0.02 sub_nr.Session1
|
|
|
0.00 sub_nr.Run1
|
0.00 sub_nr.Run1
|
|
ρ01
|
0.59
|
0.58
|
|
|
-0.86
|
-0.92
|
|
N
|
63 sub_nr
|
63 sub_nr
|
|
Observations
|
444
|
444
|
|
Marginal R2 / Conditional R2
|
0.188 / NA
|
0.191 / NA
|
Plot: Main
plot.odd_valence <- ggplot(df_task.odd_val, aes(x=Scan, y=dprime, colour=Scan, fill=Scan)) +
geom_violin(alpha=0.5) +
geom_boxplot(width=0.1, alpha=0.2) +
geom_hline(yintercept=0, colour="grey") +
ggtheme2
plot.odd_valence + facet_grid(. ~ Valence)

Post-Hoc: Brain
joint_tests(models.odd_val[[2]], by=c("Session", "Run"))
Session = Control, Run = Early:
model term df1 df2 F.ratio p.value
Valence 1 259.80 0.211 0.6464
SN 1 99.13 0.295 0.5882
Valence:SN 1 259.80 0.002 0.9638
Session = Stress, Run = Early:
model term df1 df2 F.ratio p.value
Valence 1 259.80 1.071 0.3017
SN 1 94.03 0.198 0.6577
Valence:SN 1 259.80 1.005 0.3171
Session = Control, Run = Late:
model term df1 df2 F.ratio p.value
Valence 1 259.80 1.298 0.2556
SN 1 124.94 1.117 0.2926
Valence:SN 1 259.80 0.499 0.4808
Session = Stress, Run = Late:
model term df1 df2 F.ratio p.value
Valence 1 259.80 0.075 0.7845
SN 1 95.63 0.004 0.9497
Valence:SN 1 259.80 0.001 0.9753
Hit+Miss Joint Plot
plot.odd_results <- ggarrange(
ggarrange(ggarrange(NULL, (plot.odd_sess + xlab("") + labs(color="", fill="") + ylab("dPrime (a.u.)") + theme(axis.text.x=element_blank()) ),
NULL, (plot.odd_hits + xlab("") + ylab("Hits (count)")+theme(axis.text.x=element_blank()) ),
NULL, (plot.odd_fas + xlab("") + ylab("False Alarms (count)")+theme(axis.text.x=element_blank()) ),
ncol=6, common.legend = T, legend="bottom", widths=c(0.05, 1, 0.05, 1, 0.05, 1), labels =c("A", "", "B", "", "C")),
(plot.sn_fas+ylab("False Alarms (count)")+xlab("SN Activity (a.u.)")), nrow=2, legend="bottom", labels=c("", "D")))
plot.odd_results
ggsave("figures/Figure_5_OddBeh.svg", device="svg", dpi =300, width=5, height = 6)

RT
df_task.oddrt <- df_task %>% subset(task==1 & valence==0 & rt!=0) %>% subset(rt>200)
df_task.oddrt$valence <- (str_split_fixed(df_task.oddrt$stimulus, pattern="_", n =3))[,2]
Models
# Ma
signals=c("", "SN")
models.odd_rts <- mcmapply(signals, FUN=function(X){fx.par_model(signal=X,
dv="rt",
iv=ifelse(X=="", "Session*Phase", "1+Session*Phase*"),
re=ifelse(X=="", "1+Session*Phase", paste("1+Session*Phase+", X, sep="")),
sub_id="sub_id",
fam=Gamma("log"),
data=df_task.oddrt)}, mc.cores=5)
the condition has length > 1 and only the first element will be usedthe condition has length > 1 and only the first element will be used
# Print HTML Output
asis_output(tab_model(models.odd_rts, dv.labels = c("Signal", "Signal ~ SN"), show.stat =T, show.se=T)$knitr)
|
|
Signal
|
Signal ~ SN
|
|
Predictors
|
Estimates
|
std. Error
|
CI
|
Statistic
|
p
|
Estimates
|
std. Error
|
CI
|
Statistic
|
p
|
|
(Intercept)
|
603.46
|
5.97
|
591.87 – 615.27
|
647.37
|
<0.001
|
595.57
|
8.03
|
580.03 – 611.53
|
473.61
|
<0.001
|
|
Session [Stress]
|
0.97
|
0.01
|
0.96 – 0.99
|
-3.09
|
0.002
|
0.97
|
0.01
|
0.95 – 1.00
|
-1.65
|
0.098
|
|
Phase1
|
0.98
|
0.01
|
0.97 – 0.99
|
-4.09
|
<0.001
|
0.99
|
0.01
|
0.97 – 1.01
|
-1.22
|
0.223
|
|
Session [Stress] * Phase1
|
1.00
|
0.01
|
0.98 – 1.01
|
-0.69
|
0.492
|
0.99
|
0.01
|
0.96 – 1.01
|
-1.04
|
0.299
|
|
SN
|
|
|
|
|
|
1.01
|
0.01
|
0.99 – 1.03
|
1.29
|
0.196
|
|
Session [Stress] * SN
|
|
|
|
|
|
1.00
|
0.01
|
0.98 – 1.02
|
-0.27
|
0.787
|
|
Phase1 * SN
|
|
|
|
|
|
0.99
|
0.01
|
0.98 – 1.00
|
-1.41
|
0.160
|
(Session [Stress] Phase1) SN
|
|
|
|
|
|
1.01
|
0.01
|
0.99 – 1.03
|
0.75
|
0.455
|
|
Random Effects
|
|
σ2
|
0.06
|
0.06
|
|
τ00
|
0.01 sub_id
|
0.01 sub_id
|
|
τ11
|
0.00 sub_id.Session1
|
0.00 sub_id.Session1
|
|
|
0.00 sub_id.Phase1
|
0.00 sub_id.Phase1
|
|
|
0.00 sub_id.Session1:Phase1
|
0.00 sub_id.SN
|
|
|
|
0.00 sub_id.Session1:Phase1
|
|
ρ01
|
-0.01
|
-0.01
|
|
|
-0.09
|
-0.05
|
|
|
-0.10
|
-0.58
|
|
|
|
-0.26
|
|
ICC
|
0.08
|
0.08
|
|
N
|
70 sub_id
|
70 sub_id
|
|
Observations
|
11064
|
11064
|
|
Marginal R2 / Conditional R2
|
0.011 / 0.089
|
0.013 / 0.092
|
Post-Hoc: Main
emmeans(models.odd_rts[[1]], list(pairwise ~Session*Phase), by=c("Phase"), adjust = "tukey")
$`emmeans of Session | Phase`
Phase = Early:
Session emmean SE df asymp.LCL asymp.UCL
Control 6.381 0.01084 Inf 6.360 6.402
Stress 6.348 0.01101 Inf 6.327 6.370
Phase = Late:
Session emmean SE df asymp.LCL asymp.UCL
Control 6.424 0.01155 Inf 6.402 6.447
Stress 6.401 0.01123 Inf 6.379 6.423
Results are given on the log (not the response) scale.
Confidence level used: 0.95
$`pairwise differences of Session | Phase`
Phase = Early:
2 estimate SE df z.ratio p.value
Control - Stress 0.0330 0.0114 Inf 2.895 0.0038
Phase = Late:
2 estimate SE df z.ratio p.value
Control - Stress 0.0233 0.0116 Inf 2.001 0.0454
Results are given on the log (not the response) scale.
Plot: Main
df_task.oddrt_summ <- summarySEwithin(df_task.oddrt,idvar="sub_id", measurevar="rt", withinvars =c("Session", "Phase"))
Automatically converting the following non-factors to factors: Phase
df_task.oddrt_summ$Phase <- as.numeric(as.factor(df_task.oddrt_summ$Phase))
df_task.oddrt_summ$Session <- relevel(df_task.oddrt_summ$Session, "Stress")
ggplot(df_task.oddrt_summ, aes(x=Phase, y=rt, color=Session, fill=Session)) +
geom_line() +
geom_errorbar(aes(ymin=rt-se, ymax=rt+se, color=Session),size=1, width=0.15, na.rm=T)+
ggtheme2 + scale_x_continuous(breaks=c(1,2,3,4,5,6))

Post-Hoc: Brain
joint_tests(models.odd_rts[[2]], by=c("Session", "Phase"), adjust="none")
Session = Control, Phase = Early:
model term df1 df2 F.ratio p.value
SN 1 Inf 0.002 0.9634
Session = Stress, Phase = Early:
model term df1 df2 F.ratio p.value
SN 1 Inf 0.208 0.6484
Session = Control, Phase = Late:
model term df1 df2 F.ratio p.value
SN 1 Inf 3.336 0.0678
Session = Stress, Phase = Late:
model term df1 df2 F.ratio p.value
SN 1 Inf 0.967 0.3254
FA by RT
Models
df_task.odd_rt_fa <- df_task.oddrt %>% dplyr::group_by(sub_nr, Session, Phase) %>% dplyr::summarise(rt=mean(rt)) %>% dplyr::rename(Run=Phase) %>% ungroup() %>% merge(., df_task.odd, by=c("sub_nr", "Session", "Run"))
`summarise()` has grouped output by 'sub_nr', 'Session'. You can override using the `.groups` argument.
model.odd_fa_rt <- lmer(rt ~ Session*Run*odd_FA + (1+Session+Run|sub_nr) ,
data=df_task.odd_rt_fa,
contrast=list(Session="contr.treatment"),
control=lmerControl(calc.derivs = F, optimizer="bobyqa", optCtrl=list(maxfun=100000)))
asis_output(tab_model(model.odd_fa_rt)$knitr)
|
|
rt
|
|
Predictors
|
Estimates
|
CI
|
p
|
|
(Intercept)
|
631.60
|
596.33 – 666.86
|
<0.001
|
|
Session [Stress]
|
-33.31
|
-71.26 – 4.64
|
0.085
|
|
Run1
|
-43.64
|
-61.64 – -25.65
|
<0.001
|
|
odd_FA
|
-2.74
|
-6.10 – 0.62
|
0.110
|
|
Session [Stress] * Run1
|
29.11
|
2.47 – 55.75
|
0.032
|
|
Session [Stress] * odd_FA
|
2.20
|
-1.99 – 6.38
|
0.304
|
|
Run1 * odd_FA
|
3.16
|
0.93 – 5.40
|
0.005
|
(Session [Stress] * Run1) * odd_FA
|
-3.69
|
-6.69 – -0.69
|
0.016
|
|
Random Effects
|
|
σ2
|
817.56
|
|
τ00 sub_nr
|
8707.72
|
|
τ11 sub_nr.Session1
|
803.41
|
|
τ11 sub_nr.Run1
|
123.85
|
|
ρ01
|
0.11
|
|
|
-0.66
|
|
ICC
|
0.92
|
|
N sub_nr
|
63
|
|
Observations
|
222
|
|
Marginal R2 / Conditional R2
|
0.049 / 0.920
|
check_model(model.odd_fa_rt)
`guides(<scale> = FALSE)` is deprecated. Please use `guides(<scale> = "none")` instead.

#Gaus 2670 #GausLog 3336.29 # Gamma 7770 #Gamma Log
Post-Hoc
joint_tests(model.odd_fa_rt, by=c("Session", "Run"), adjust="none")
Session = Control, Run = Early:
model term df1 df2 F.ratio p.value
odd_FA 1 109.60 0.033 0.8571
Session = Stress, Run = Early:
model term df1 df2 F.ratio p.value
odd_FA 1 107.68 0.355 0.5527
Session = Control, Run = Late:
model term df1 df2 F.ratio p.value
odd_FA 1 116.29 9.712 0.0023
Session = Stress, Run = Late:
model term df1 df2 F.ratio p.value
odd_FA 1 107.94 0.000 0.9934
Plot
emmip(model.odd_fa_rt, Session + Run ~ odd_FA, cov.reduce = range)+ ggtheme2# + scale_color_manual(values=stress_colors)

rcorr(df_task.odd_rt_fa$rt, df_task.odd_rt_fa$odd_FA)
x y
x 1.00 -0.07
y -0.07 1.00
n= 222
P
x y
x 0.2951
y 0.2951
plot.odd_fa_rt <- ggplot(df_task.odd_rt_fa, aes(x=odd_FA, y=rt)) + geom_smooth(method="lm" )
plot.odd_fa_rt + facet_grid(Session~Run)
